home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCmark.c
< prev
next >
Wrap
C/C++ Source or Header
|
1991-12-03
|
27KB
|
925 lines
/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
# include <signal.h>
# include <sys/types.h>
# include <sys/times.h>
# include <sys/time.h>
# include <sys/resource.h>
# include <sys/timeb.h>
# include "xr/GCPrivate.h"
# include "xr/Threads.h"
#define I_HOLD_ML(ml) (((XR_ML)(ml))->ml_holder == XR_currThread)
# define COUNT_CACHE_HITS
# undef COUNT_CACHE_HITS
/*
* Derived from the Boehm-Demers collector, with some of C. Hauser's code,
* and probably some of Mark Weiser's code.
* Boehm, April 17, 1990 5:34:55 pm PDT
*/
/*
* This file contains the functions:
* GC_mark(alignment)
* GC_mark_all(b,t,alignment,mark_clean)
* GC_get_current_sp()
*/
/* Leaving these defined enables output to stderr. In order of */
/* increasing verbosity: */
#define DEBUG /* Verbose debugging output */
#undef DEBUG
#define DEBUG2 /* EXTREMELY verbose debugging output */
#undef DEBUG2
/* Return a rough approximation to the stack pointer. A hack, */
/* but it's semi-portable. */
word * GC_get_current_sp()
{
word x;
return(&x);
}
/*
* Proc to see how much page swapping is going on ... returns the
* number of page faults being serviced
*/
unsigned long GC_check_faults()
{
struct rusage usage;
getrusage(RUSAGE_SELF, &usage); /* NOT XR_GetRUsage(...)! */
return(usage.ru_majflt);
}
/*
* Convert the mark stack to bucket form.
*/
/*
* the performance is insensitive to the SEGSIZE and WINDOW
* for experiments tried this far.
*/
#define SEGSIZE 0x100000
/* size of the segment recorded in each hash bucket */
static unsigned long WINDOW = 4;
/* number of segments inspected together during marking */
/* not #define'd for ease of experimentation */
#define BOUNDARY 0xffffffff
/* non-pointer value marking edges of buckets */
static unsigned long **alloc;
/*
* all allocation in this file is done below GC_mark_stack_top,
* by moving alloc to lower addresses.
*/
static unsigned long **buckets;
/*
* buckets is the address of an array of pointers to hashbuckets.
* each bucket contains possible pointers into a single SEGSIZE segment
* of the heap.
*/
static int removals;
static int insertions;
/*
* insertions-removals is number of candidates remaining to be
* processed. Kept separately for statistics gathering.
*/
static char * GC_my_heaplim;
/*
* GC_heaplim when marking started. Pointers beyond this can safely
* be ignored, since they must have been created later. Thus the page
* in which they were stored will have been dirtied.
*/
#ifdef PRINTSTATS
static unsigned bucketsallocated;
#endif
#define PTRSPERBUCKET 252
/*
* no experimentation has been done on the sensitivity of performance to
* this value.
*/
typedef struct {
unsigned long *prev;
/* Points to last pointer slot in prev bucket */
unsigned long topmark;
unsigned long *ptrs[PTRSPERBUCKET];
unsigned long bottommark;
unsigned long *next;
/* If in use, points to topmark of next bucket. */
/* If free, points to beginning of next free bucket */
} hashbucket;
static hashbucket * bucket_free_list = NIL;
hashbucket * allocbucket()
{
hashbucket * new;
if (bucket_free_list != NIL) {
new = bucket_free_list;
bucket_free_list = (hashbucket *) (bucket_free_list -> next);
} else {
/* claim space for a bucket from the allocation stack */
alloc -= (sizeof(hashbucket)/sizeof(unsigned long *));
new = (hashbucket *)alloc;
}
if((unsigned long) new < 0x10000000) GC_abort("allocbucket");
return(new);
}
void freebucket(b)
hashbucket *b;
{
b -> next = (unsigned long *) bucket_free_list;
bucket_free_list = b;
}
static unsigned long *newbucket(prevbucket)
unsigned long *prevbucket;
/*
* returns an empty hash bucket pointing back at prevbucket
*/
{
hashbucket *new;
int i;
new = allocbucket();
/* chain buckets hashing to the same position */
new->prev = prevbucket;
new->next = NIL;
/* insert boundary marks and initialize contents to empty */
new->topmark = (new->bottommark = BOUNDARY);
for (i=0; i < PTRSPERBUCKET; i++) new->ptrs[i] = NIL;
#ifdef PRINTSTATS
++bucketsallocated;
#endif
/* return a pointer to the topmark slot (making this bucket empty) */
return (&(new->topmark));
}
static void insert(p)
unsigned long p;
/*
* inserts candidate pointer p in the bucket structure
*/
{
unsigned long **b;
unsigned long *slotptr;
struct hblk * h = HBLKPTR(p);
unsigned long heapoffset = p - (unsigned) GC_heapstart;
if (p >= (unsigned long)GC_my_heaplim) return;
/* Note that the bucket we use corresponds to the actual pointer */
/* we found, and not necessarily the start address of the heap */
/* block. This may cause the marking process to exhibit */
/* slightly less locality. But it saves some moderately */
/* expensive calculation here. */
b = buckets+heapoffset/SEGSIZE;
/* candidate bucket */
slotptr = ++(*b);
/* candidate position in the hashbucket to record p */
if (*slotptr == BOUNDARY) { /* full: slotptr is pointing
at the bottom mark */
slotptr++;
/* move along to the next field */
if (*slotptr == (unsigned) NIL) { /* no next bucket yet */
*slotptr = (unsigned) newbucket(slotptr-2);
/* create one and chain together */
};
/* assert: *slotptr == BOUNDARY; (the topmark) */
slotptr = ((unsigned long *) (*slotptr))+1;
/* move to the first real slot */
*b = slotptr;
/* link into hash table */
};
*slotptr = p;
/* what we really wanted to do from the start */
insertions++;
}
static unsigned long remove(b)
unsigned long **b;
/*
* return a candidate pointer from bucket *b points inside, NIL if none
*/
{
unsigned long *slotptr;
slotptr = *b;
if (slotptr == NIL) return (unsigned long)NIL;
/* no heap blocks in this bucket */
if (*slotptr == BOUNDARY) { /* at the topmark */
if (*(slotptr-1) == (unsigned long)NIL) {
return (unsigned long)NIL; /* no previous bucket */
} else {
hashbucket * ob = (hashbucket *)(slotptr - 1);
slotptr = (unsigned long *) *(slotptr-1); /* move to previous bucket */
if ((ob -> next) != NIL) {
/* We leave ob around to avoid lots of allocation around boundary */
/* We deallocate next bucket, to avoid wasting stack space. */
freebucket(ob -> next - 1);
ob -> next = NIL;
}
}
};
*b = slotptr-1; /* remove an item from the bucket */
removals++;
return *slotptr; /* return the item */
}
static void docandidate();
# ifdef USE_HEAP
--> The careful marking stuff currently assumes that we use the process
stack for marking
# endif
static void carefulmark(alignment)
int alignment;
/*
* for marking when page faults are occurring;
*/
{
unsigned nbuckets;
unsigned long **bucketlim;
register struct obj *p; /* pointer to current object to be marked */
long heapsize; /* Unlike the global, this includes blocks not administered */
/* by us. */
unsigned passcount = 0;
struct hblk **blocklim;
unsigned blockcount = 0; /* number of heap blocks */
unsigned startfaults = GC_check_faults();
unsigned markfaults;
if (GC_mark_stack_top == GC_mark_stack_bottom) return;
insertions = removals = 0;
bucketsallocated = 0;
GC_my_heaplim = GC_heaplim;
heapsize = GC_my_heaplim - GC_heapstart;
alloc = (unsigned long **) GC_mark_stack_top;
bucket_free_list = NIL;
{
/* construct the empty hash table for candidate pointers */
unsigned long **b;
nbuckets = (heapsize + SEGSIZE-1) / SEGSIZE;
bucketlim = alloc;
buckets = (alloc = alloc-nbuckets); /* claim space for nbuckets */
for (b = buckets; b < bucketlim; b++) {
*b = newbucket(NIL);
};
}
{ /* insert all the initial candidates */
register word * lim;
register word * p;
if (sizeof(hashbucket) % sizeof(word) != 0) GC_abort("carefulmark 0");
while (GC_mark_stack_top < GC_mark_stack_bottom) {
lim = (word *)(((char *)GC_mark_stack_top) + sizeof (hashbucket));
p = GC_mark_stack_top;
if (lim > GC_mark_stack_bottom) {
while (p < GC_mark_stack_bottom) insert (*p++);
} else {
while (p < lim) insert (*p++);
/* Recycle this section of the mark stack for hash buckets */
freebucket((hashbucket *)GC_mark_stack_top);
}
GC_mark_stack_top = p;
};
};
{
/*
* Now all the items from the initial stack have been put in buckets;
* Start processing them;
*/
while (insertions != removals) {
unsigned long **lowlim, **highlim;
/* limits of current confined analysis */
unsigned long **b; /* current hash slot to inspect */
# ifdef PRINTSTATS
++passcount;
# endif
highlim = buckets+WINDOW;
if (highlim >= bucketlim) highlim = buckets;
lowlim = b = buckets;
while (lowlim < bucketlim) {
p = (struct obj *) remove(b);
if (p == (struct obj *)NIL) {
b++;
if (b == bucketlim) b = buckets;
if (b == highlim) {
while (lowlim < bucketlim &&
(*lowlim == NIL
|| (unsigned) (**lowlim) == BOUNDARY)) {
highlim++;
if (highlim==bucketlim) highlim=buckets;
lowlim++;
}; /* while ((lowlim<bucketlim) . . . */
b = lowlim;
}; /* if (b==highlim) */
} else /* p != NIL */ {
docandidate(p, alignment);
};
}; /* while (lowlim < bucketlim) */
}; /* while (insertions != removals) */
# ifdef PRINTSTATS
{
markfaults = GC_check_faults() - startfaults;
GC_markfaults += markfaults;
GC_printf("Ending mark phase. Faults: %d, Insertions: %d\n",
markfaults, insertions);
GC_printf("Buckets: %d, Passes: %d\n",bucketsallocated, passcount);
};
# endif
};
} /* carefulmark */
static void docandidate(p, alignment)
struct obj *p;
int alignment;
{
long sz;
long word_no;
struct hblk * h;
bool is_atomic;
# ifdef SEPARATE_HEADERS
register struct hblkhdr * hhdr;
# endif
# ifdef COUNT_CACHE_HITS
/* Hard to print here, put must be defined for GCcheck_ptr.c */
long map_cache_hits = 0;
long map_cache_misses = 0;
# endif
/* Check whether p points to a valid unmarked object. Continue to the */
/* next iteration if not. Set h to point to the beginning of the block */
/* containing p. Set word_no to the offset of the beginning of the */
/* object. Set sz to the size of the object in words. */
/* Set is_atomic and hhdr. */
# define CONTINUE_M return
# ifdef BLACK_LIST
# define CONTINUE goto register_bad_c
# else
# define CONTINUE return
# endif
# include "GCcheck_ptr.c"
# undef CONTINUE
# undef CONTINUE_M
# ifdef GATHERSTATS
GC_my_objects_in_use++;
# endif
set_mark_bit(h, word_no);
if (is_atomic) {
/* Atomic object */
# ifdef GATHERSTATS
GC_my_atomic_in_use += sz;
# endif
return;
};
GC_my_composite_in_use += sz;
{
/* Mark from fields inside the object */
register struct obj ** q;
register struct obj * r;
register long lim; /* Should be struct obj **, but we're out of */
/* A registers on a 68000. */
# ifdef INTERIOR_POINTERS
/* Adjust p, so that it's properly aligned */
p = ((struct obj *)(((word *)h) + word_no));
# endif
# ifdef UNALIGNED
lim = ((long)(&(p -> obj_component[sz]))) - 3;
# else
lim = (long)(&(p -> obj_component[sz]));
# endif
for (q = (struct obj **)(&(p -> obj_component[0]));
q < (struct obj **)lim;) {
r = *q;
if (quicktest(r)) {
# ifdef DEBUG2
GC_vprintf("Found plausible nested pointer");
GC_vprintf(": 0x%X inside 0x%X at 0x%X\n", r, p, q);
# endif
insert(r);
}
# ifdef UNALIGNED
q = ((struct obj **)(((long)q)+alignment));
# else
q++;
# endif
}
}; /* mark from fields inside object */
# ifdef BLACK_LIST
return;
register_bad_c:
/* p looks like a pointer, but doesn't point to an */
/* objects. */
if ((unsigned long)p
>= (unsigned long) ((struct hblk *) GC_heapstart + MAP_SIZE)) {
return;
}
GC_add_to_black_list(p);
# endif
} /* docandidate */
static unsigned toomanyfaults = 1000;
/*
* Clear mark bits in all allocated heap blocks.
* Assumes I do not hold GC_allocate_ml, but all reclaim lists are
* empty. Thus nobody will look at mark bits while we're
* running, and hb_busy will not get set. It may be
* set when we start, but then we simply wait.
*/
void GC_clear_marks()
{
register int j;
register struct hblk **p;
register struct hblk *q;
if (I_HOLD_ML(&GC_allocate_ml)) {
XR_Panic("GC_clear_marks 0");
}
XR_MonitorEntry(&GC_allocate_ml);
for (q = (struct hblk *) GC_heapstart; ((char*)q) < GC_heaplim; q++)
if (is_hblk(q)) {
while (hb_busy(q)) {
/* There are better ways to synchronize. But empirically */
/* this happens about .2 times during the life of a Cedar world. */
XR_MonitorExit(&GC_allocate_ml);
GC_printf("GC_clear_marks waiting for reclaim\n");
XR_PauseAbortable(1);
XR_MonitorEntry(&GC_allocate_ml);
}
# ifdef VISUALIZE
/* redisplay all marked objects as allocated */
undisplay_marks(q);
# endif
for (j = 0; j < MARK_BITS_SZ; j++) {
hb_marks(q)[j] = 0;
}
}
XR_MonitorExit(&GC_allocate_ml);
/* In use counts track number of marked words. Reset private counters. */
GC_my_atomic_in_use = GC_my_composite_in_use = 0;
GC_my_objects_in_use = 0;
}
/* Mark all objects corresponding to pointers between GC_mark_stack_bottom */
/* and GC_mark_stack_top. Assume that nested pointers are aligned */
/* on alignment-byte boundaries. */
/* The mark stack is empty when mark returns. */
void GC_mark(alignment)
int alignment;
{
register long sz;
extern char end, etext;
register struct obj *p; /* pointer to current object to be marked */
unsigned startfaults;
unsigned markfaults;
# ifdef COUNT_CACHE_HITS
long map_cache_hits = 0;
long map_cache_misses = 0;
# endif
# ifdef SEPARATE_HEADERS
register struct hblkhdr * hhdr;
# endif
bool is_atomic;
/* Register copies of frequently referenced globals */
/* Referenced primarily from inside macros. */
register char * GC_heapstart_reg;
register word * GC_mark_stack_top_reg;
# ifdef GATHERSTATS
register long GC_my_objects_in_use_reg;
# endif
# ifdef VISUALIZE
window_update();
# endif
if ( GC_markCarefully ) {
carefulmark(alignment);
return;
}
startfaults = GC_check_faults();
/* Set up "global" registers */
GC_heapstart_reg = GC_heapstart;
GC_mark_stack_top_reg = GC_mark_stack_top;
# define GC_mark_stack_top GC_mark_stack_top_reg
# define GC_heapstart GC_heapstart_reg
# ifdef GATHERSTATS
GC_my_objects_in_use_reg = GC_my_objects_in_use;
# endif
while (GC_mark_stack_top != GC_mark_stack_bottom) {
register long word_no;
register struct hblk * h;
# ifdef USE_STACK
p = (struct obj *)(*GC_mark_stack_top++);
# else
# ifdef USE_HEAP
p = (struct obj *)(*GC_mark_stack_top--);
# else
--> fixit <--
# endif
# endif
/* Check whether p points to a valid unmarked object. Continue to the */
/* next iteration if not. Set h to point to the beginning of the block */
/* containing p. Set word_no to the offset of the beginning of the */
/* object. Set sz to the size of the object in words. */
/* Set is_atomic and hhdr. */
# define CONTINUE_M continue
# ifdef BLACK_LIST
# define CONTINUE goto register_bad;
# else
# define CONTINUE continue
# endif
# include "GCcheck_ptr.c"
# undef CONTINUE
# undef CONTINUE_M
# ifdef DEBUG2
GC_vprintf("*** set bit for heap %x, word %x\n",h,word_no);
# endif
# ifdef SEPARATE_HEADERS
set_mark_bit_from_hdr(hhdr, word_no);
# else
set_mark_bit(h, word_no);
# endif
# ifdef VISUALIZE
displayMark(p, sz);
# endif
# ifdef GATHERSTATS
GC_my_objects_in_use_reg++;
# endif
if (is_atomic) {
/* Atomic object */
# ifdef GATHERSTATS
GC_my_atomic_in_use += sz;
# endif
continue;
}
GC_my_composite_in_use += sz;
{
/* Mark from fields inside the object */
register struct obj ** q;
register struct obj * r;
register long lim; /* Should be struct obj **, but we're out of */
/* A registers on a 68000. */
# ifdef INTERIOR_POINTERS
/* Adjust p, so that it's properly aligned */
p = ((struct obj *)(((word *)h) + word_no));
# endif
# ifdef UNALIGNED
lim = ((long)(&(p -> obj_component[sz]))) - 3;
# else
lim = (long)(&(p -> obj_component[sz]));
# endif
for (q = (struct obj **)(&(p -> obj_component[0]));
q < (struct obj **)lim;) {
r = *q;
if (quicktest(r)) {
# ifdef DEBUG2
GC_vprintf("Found plausible nested pointer");
GC_vprintf(": 0x%X inside 0x%X at 0x%X\n", r, p, q);
# endif
PUSH_MS(((word)r));
}
# ifdef UNALIGNED
q = ((struct obj **)(((long)q)+alignment));
# else
q++;
# endif
}
}
# ifdef VISUALIZE
/* show marked objects */
for (i = 0; i < MAP_SIZE; i++) {
if (hblkmap[i] == HBLK_VALID && GC_dirty_bits[i+offset]) {
hbp = (struct hblk *)(HBLKSIZE * i + GC_heapstart);
sz = hb_sz(hbp);
if (sz < 0) {
continue;
}
if (sz > MAXOBJSZ &&
mark_bit(hbp, (hbp -> hb_body) - ((word *)(hbp))) ) {
displayMark(hbp -> hb_body, sz);
} else {
/* Small composite objects */
p = (word *)(hbp->hb_body);
word_no = p - ((word *)hbp);
plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- WORDS_TO_BYTES(sz));
for (; p <= plim; p += sz, word_no += sz) {
if (mark_bit(hbp, word_no)) {
displayMark(p, sz);
}
}
}
}
}
# endif
# ifdef BLACK_LIST
continue;
register_bad:
/* p looks like a pointer, but doesn't point to an */
/* objects. */
if ((unsigned long)p
>= (unsigned long) ((struct hblk *) GC_heapstart + MAP_SIZE)) {
continue;
}
GC_add_to_black_list(p);
# endif
}
# undef GC_heapstart
# undef GC_mark_stack_top
GC_mark_stack_top = GC_mark_stack_top_reg;
# ifdef GATHERSTATS
GC_my_objects_in_use = GC_my_objects_in_use_reg;
# endif
markfaults = GC_check_faults()-startfaults;
GC_markfaults += markfaults;
# ifdef COUNT_CACHE_HITS
GC_vprintf("%d map cache hits, %d misses\n",
map_cache_hits, map_cache_misses);
# endif
# ifdef PRINTSTATS
if (markfaults > 0) {
GC_vprintf("Generated %d page faults during marking\n", markfaults);
}
# endif
}
static void GC_mark_all_worker();
static void GC_mark_all_with_coercion_worker();
/*********************************************************************/
/* Push all possible pointers between b and t onto the mark stack. */
/* A subsequent call to mark will then */
/* mark all locations reachable via pointers located between b and t */
/* b is the first location to be checked. t is one past the last */
/* location to be checked. */
/* Assume that pointers are aligned on alignment-byte */
/* boundaries. */
/* Treat clean pages or pages with unknown dirty status according */
/* to whether mark_clean is ALL_POINTERS, POSSIBLY_DIRTY_POINTERS, */
/* or DEFINITELY_DIRTY_POINTERS. */
/* If coerce_pointers is TRUE, then try to coerce pointers to */
/* valid object addresses berfore pushing them. */
/* The coercion involves translating an arbitrary pointer to the */
/* middle of an object into a pointer to the object beginning. */
/*********************************************************************/
void GC_mark_all(b, t, alignment, mark_clean, coerce_pointers)
word * b;
word * t;
int alignment;
int mark_clean;
bool coerce_pointers;
{
# ifdef DEBUG
GC_vprintf("Checking for pointers between 0x%X and 0x%X\n",
b, t);
# endif
/* Round b down so it is properly aligned */
# ifdef UNALIGNED
if (alignment == 2) {
b = (word *)(((long) b) & ~1);
} else if (alignment == 4) {
b = (word *)(((long) b) & ~3);
} else if (alignment != 1) {
GC_abort("Bad alignment parameter to GC_mark_all");
}
# else
b = (word *)(((long) b) & ~3);
# endif
if (mark_clean == ALL_POINTERS) {
if (coerce_pointers) {
GC_mark_all_with_coercion_worker(b, t, alignment);
} else {
GC_mark_all_worker(b, t, alignment);
}
} else {
register unsigned long VD_base_reg
= (unsigned long) VD_base;
if (b >= (word *)VD_base_reg && t <= (word *)GC_heaplim) {
register unsigned long l = (unsigned long)b;
register unsigned long u = ((unsigned long)HBLKPTR(l)) + HBLKSIZE;
register int index;
while (l < (unsigned long)t) {
if (u > (unsigned long)t) {
u = (unsigned long)t;
}
index = divHBLKSZ(((unsigned long)l)
- ((unsigned long) VD_base_reg));
if (GC_dirty_bits[index]) {
if (coerce_pointers) {
GC_mark_all_with_coercion_worker(l, u, alignment);
} else {
GC_mark_all_worker(l, u, alignment);
}
}
l = u;
u += HBLKSIZE;
}
} else if (mark_clean == POSSIBLY_DIRTY_POINTERS) {
if (coerce_pointers) {
GC_mark_all_with_coercion_worker(b, t, alignment);
} else {
GC_mark_all_worker(b, t, alignment);
}
} else if (mark_clean != DEFINITELY_DIRTY_POINTERS) {
GC_abort("Bad argument to GC_mark_all\n");
}
}
}
static void GC_mark_all_worker(b, t, alignment)
word *b;
word *t;
int alignment;
{
register word *p;
register word r;
register word *lim;
/* Register copies of frequently referenced globals */
/* Referenced primarily from inside macros. */
register char * GC_heapstart_reg = GC_heapstart;
register word * GC_mark_stack_top_reg = GC_mark_stack_top;
# define GC_heapstart GC_heapstart_reg
# define GC_mark_stack_top GC_mark_stack_top_reg
/* check all pointers in range and put on mark_stack if quicktest true */
lim = t - 1 /* longword */;
for (p = b; ((unsigned) p) <= ((unsigned) lim);) {
/* Coercion to unsigned in the preceding appears to be necessary */
/* due to a bug in the 4.2BSD C compiler. */
r = *p;
if (quicktest(r)) {
# ifdef DEBUG2
GC_vprintf("Found plausible pointer: %X\n", r);
# endif
PUSH_MS(r); /* push r onto the mark stack */
}
# ifdef UNALIGNED
p = (word *)(((char *)p) + alignment);
# else
p++;
# endif
}
# undef GC_heapstart
# undef GC_mark_stack_top
GC_mark_stack_top = GC_mark_stack_top_reg;
}
static void GC_mark_all_with_coercion_worker(b, t, alignment)
word *b;
word *t;
int alignment;
{
register word *p;
register word r;
register word *lim;
register struct hblk * h;
register long sz;
register long word_no;
/* Register copies of frequently referenced globals */
/* Referenced primarily from inside macros. */
register char * GC_heapstart_reg = GC_heapstart;
register char * GC_heaplim_reg = GC_heaplim;
register word * GC_mark_stack_top_reg = GC_mark_stack_top;
# define GC_heapstart GC_heapstart_reg
# define GC_mark_stack_top GC_mark_stack_top_reg
/* check all pointers in range and try to coerce to object pointers. */
/* Put resulting pointers on mark stack. */
lim = t - 1 /* longword */;
for (p = b; ((unsigned) p) <= ((unsigned) lim);) {
r = *p;
if (quicktest(r) && (char *)r < GC_heaplim_reg) {
# ifdef DEBUG2
GC_vprintf("Found plausible pointer: %X\n", r);
# endif
h = HBLKPTR(r);
if (!is_hblk(h)) {
char m = get_map(h);
while (m > 0 && m < 0x7f) {
h -= m;
m = get_map(h);
}
if (!is_hblk(h)) {
goto next_p;
}
}
sz = hb_sz(h);
if (sz < 0) sz = -sz;
if (sz > MAXOBJSZ) {
r = ((word)h) + HDR_BYTES;
} else {
word_no = WORD_NO(r, h);
word_no = adjusted_word_no(word_no,sz);
r = (word)((word *)h + word_no);
}
PUSH_MS(r); /* push r onto the mark stack */
}
next_p:
# ifdef UNALIGNED
p = (word *)(((char *)p) + alignment);
# else
p++;
# endif
}
# undef GC_heapstart
# undef GC_mark_stack_top
GC_mark_stack_top = GC_mark_stack_top_reg;
}